繼續昨天的工作,已部署高可用 mon 群集,接下來將完整的 Ceph 部署完畢。
for NODE in master1 worker1 worker2
do
	ssh ${NODE} "
		docker run -d --net=host \
			--restart=always \
			-v /etc/ceph:/etc/ceph \
			-v /var/log/ceph/:/var/log/ceph/ \
			-v /var/lib/ceph/mgr:/var/lib/ceph/mgr \
			-v /var/lib/ceph/bootstrap-mds:/var/lib/ceph/bootstrap-mds \
			-v /var/lib/ceph/bootstrap-osd:/var/lib/ceph/bootstrap-osd \
			-v /var/lib/ceph/bootstrap-rbd:/var/lib/ceph/bootstrap-rbd \
			-v /var/lib/ceph/bootstrap-rgw:/var/lib/ceph/bootstrap-rgw \
			--name mgr \
			${DOCKERIMAGE} mgr
		"
	for DEV in /dev/sdb /dev/sdc
	do
		OSDNAME=osd${DEV//\//-}
		ssh ${NODE} "
			docker run --rm --privileged=true \
				-v /dev/:/dev/ \
				-e OSD_DEVICE=$DEV \
				${DOCKERIMAGE} zap_device
			"
		ssh ${NODE} "
			docker run -d --net=host \
			    --pid=host \
			    --restart=always \
			    --privileged=true \
			    -v /etc/ceph:/etc/ceph \
			    -v /var/log/ceph/:/var/log/ceph/ \
			    -v /var/lib/ceph/bootstrap-mds:/var/lib/ceph/bootstrap-mds \
			    -v /var/lib/ceph/bootstrap-osd:/var/lib/ceph/bootstrap-osd \
			    -v /var/lib/ceph/bootstrap-rbd:/var/lib/ceph/bootstrap-rbd \
			    -v /var/lib/ceph/bootstrap-rgw:/var/lib/ceph/bootstrap-rgw \
			    -v /dev/:/dev/ \
			    -e OSD_DEVICE=$DEV \
			    --name $OSDNAME \
			    ${DOCKERIMAGE} osd
			"
	done
	ssh ${NODE} "
		docker run -d --net=host \
			--restart=always \
			-v /etc/ceph:/etc/ceph \
			-v /var/log/ceph/:/var/log/ceph/ \
			-v /var/lib/ceph/mds:/var/lib/ceph/mds \
			-v /var/lib/ceph/bootstrap-mds:/var/lib/ceph/bootstrap-mds \
			-v /var/lib/ceph/bootstrap-osd:/var/lib/ceph/bootstrap-osd \
			-v /var/lib/ceph/bootstrap-rbd:/var/lib/ceph/bootstrap-rbd \
			-v /var/lib/ceph/bootstrap-rgw:/var/lib/ceph/bootstrap-rgw \
			-e CEPHFS_CREATE=1 \
			-e CEPHFS_DATA_POOL_PG=$PGNUM \
			-e CEPHFS_METADATA_POOL_PG=$PGNUM \
			--name mds \
			${DOCKERIMAGE} mds
		"
done
$ docker exec mon ceph -s
  cluster:
    id:     324cfab0-b25b-4cee-9b1e-bed1c7a68c83
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum master1,worker1,worker2
    mgr: master1(active), standbys: worker1, worker2
    mds: cephfs-1/1/1 up  {0=master1=up:active}, 2 up:standby
    osd: 6 osds: 6 up, 6 in
 
  data:
    pools:   2 pools, 128 pgs
    objects: 21 objects, 2.19KiB
    usage:   6.12GiB used, 43.9GiB / 50.0GiB avail
    pgs:     128 active+clean
說明部署結果: